From a9214a8feef1f35d2b852443c158221e60b90195 Mon Sep 17 00:00:00 2001 From: "kfraser@localhost.localdomain" Date: Thu, 29 Mar 2007 19:18:43 +0100 Subject: [PATCH] Enable VMX MSR bitmap support. We use it to avoid VMExits on FS_BASE and GS_BASE MSR accesses. Signed-off-by: Weidong Han Signed-off-by: Xin Li Signed-off-by: Keir Fraser --- xen/arch/x86/hvm/hvm.c | 6 ++++++ xen/arch/x86/hvm/vmx/vmcs.c | 26 ++++++++++++++++++++++++++ xen/include/asm-x86/hvm/support.h | 1 + xen/include/asm-x86/hvm/vmx/vmcs.h | 3 +++ xen/include/asm-x86/msr.h | 4 ++-- 5 files changed, 38 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index d01935de82..3a40d69c01 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -59,6 +59,9 @@ struct hvm_function_table hvm_funcs __read_mostly; /* I/O permission bitmap is globally shared by all HVM guests. */ char __attribute__ ((__section__ (".bss.page_aligned"))) hvm_io_bitmap[3*PAGE_SIZE]; +/* MSR permission bitmap is globally shared by all HVM guests. */ +char __attribute__ ((__section__ (".bss.page_aligned"))) + hvm_msr_bitmap[PAGE_SIZE]; void hvm_enable(struct hvm_function_table *fns) { @@ -72,6 +75,9 @@ void hvm_enable(struct hvm_function_table *fns) memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap)); clear_bit(0x80, hvm_io_bitmap); + /* All MSR accesses are intercepted by default. */ + memset(hvm_msr_bitmap, ~0, sizeof(hvm_msr_bitmap)); + hvm_funcs = *fns; hvm_enabled = 1; } diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index e7009afb5c..a8d92dd1cf 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -61,6 +61,25 @@ static u32 adjust_vmx_controls(u32 ctl_min, u32 ctl_max, u32 msr) return ctl; } +static void disable_intercept_for_msr(u32 msr) +{ + /* + * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). + * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. + */ + if ( msr <= 0x1fff ) + { + __clear_bit(msr, hvm_msr_bitmap + 0x000); /* read-low */ + __clear_bit(msr, hvm_msr_bitmap + 0x400); /* write-low */ + } + else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) ) + { + msr &= 0x1fff; + __clear_bit(msr, hvm_msr_bitmap + 0x800); /* read-high */ + __clear_bit(msr, hvm_msr_bitmap + 0xc00); /* write-high */ + } +} + void vmx_init_vmcs_config(void) { u32 vmx_msr_low, vmx_msr_high, min, max; @@ -82,6 +101,7 @@ void vmx_init_vmcs_config(void) #ifdef __x86_64__ min = max |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING; #endif + max |= CPU_BASED_ACTIVATE_MSR_BITMAP; _vmx_cpu_based_exec_control = adjust_vmx_controls( min, max, MSR_IA32_VMX_PROCBASED_CTLS_MSR); @@ -105,6 +125,9 @@ void vmx_init_vmcs_config(void) vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control; vmx_vmexit_control = _vmx_vmexit_control; vmx_vmentry_control = _vmx_vmentry_control; + + disable_intercept_for_msr(MSR_FS_BASE); + disable_intercept_for_msr(MSR_GS_BASE); } else { @@ -287,6 +310,9 @@ static void construct_vmcs(struct vcpu *v) __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control); v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control; + if ( vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP ) + __vmwrite(MSR_BITMAP, virt_to_maddr(hvm_msr_bitmap)); + /* I/O access bitmap. */ __vmwrite(IO_BITMAP_A, virt_to_maddr(hvm_io_bitmap)); __vmwrite(IO_BITMAP_B, virt_to_maddr(hvm_io_bitmap + PAGE_SIZE)); diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h index 3d7b3a43bb..fea8611bf8 100644 --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -215,6 +215,7 @@ int hvm_load(struct domain *d, hvm_domain_context_t *h); /* End of save/restore */ extern char hvm_io_bitmap[]; +extern char hvm_msr_bitmap[]; extern int hvm_enabled; void hvm_enable(struct hvm_function_table *); diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h index 2fa9c333f3..077dd61919 100644 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h @@ -109,6 +109,7 @@ extern int vmcs_version; #define CPU_BASED_MOV_DR_EXITING 0x00800000 #define CPU_BASED_UNCOND_IO_EXITING 0x01000000 #define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000 +#define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000 #define CPU_BASED_MONITOR_EXITING 0x20000000 #define CPU_BASED_PAUSE_EXITING 0x40000000 @@ -143,6 +144,8 @@ enum vmcs_field { IO_BITMAP_A_HIGH = 0x00002001, IO_BITMAP_B = 0x00002002, IO_BITMAP_B_HIGH = 0x00002003, + MSR_BITMAP = 0x00002004, + MSR_BITMAP_HIGH = 0x00002005, VM_EXIT_MSR_STORE_ADDR = 0x00002006, VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007, VM_EXIT_MSR_LOAD_ADDR = 0x00002008, diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h index 31e2afd8bf..bd7f27b6c5 100644 --- a/xen/include/asm-x86/msr.h +++ b/xen/include/asm-x86/msr.h @@ -126,8 +126,8 @@ static inline void wrmsrl(unsigned int msr, __u64 val) #define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ #define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ #define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ -#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */ -#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */ +#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ +#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ #define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */ /* EFER bits: */ #define _EFER_SCE 0 /* SYSCALL/SYSRET */ -- 2.30.2